In [2]:
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
import numpy as np
In [3]:
####### Variables #####"
IMAGE_SIZE = 256
BATCH_SIZE = 32
RGB_CHANNELS = 3
EPOCHS = 50 #50
In [4]:
imageDataset = tf.keras.preprocessing.image_dataset_from_directory(
"dataset/",
shuffle=True,
image_size= (IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE
)
Found 2152 files belonging to 3 classes.
In [6]:
class_names = imageDataset.class_names
class_names
Out[6]:
['Potato___Early_blight', 'Potato___Late_blight', 'Potato___healthy']
In [7]:
len(imageDataset)
Out[7]:
68
In [8]:
for image_batch, label_batch in imageDataset.take(1):
#print(image_batch.shape)
#print(label_batch.numpy())
print(image_batch[0].shape)
(256, 256, 3)
In [9]:
plt.figure(figsize=(15, 15))
for image_batch, label_batch in imageDataset.take(1):
"""plt.imshow(image_batch[0].numpy().astype("uint8"))
plt.title(class_names[label_batch[0]])"""
for i in range(12):
ax = plt.subplot(3, 4, i+1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[label_batch[i]])
plt.axis("off")
In [ ]:
######## Split our dataset
# 80% ==> training
# 20% ==> 10% validation, 10% test
In [10]:
train_size = 0.8
len(imageDataset)*train_size # 80% of our dataset
Out[10]:
54.400000000000006
In [11]:
train_set = imageDataset.take(54)
len(train_set)
Out[11]:
54
In [12]:
test_set = imageDataset.skip(54)
len(test_set)
Out[12]:
14
In [13]:
validation_size = 0.1
len(imageDataset)*validation_size
Out[13]:
6.800000000000001
In [14]:
validation_set = test_set.take(6)
len(validation_set)
Out[14]:
6
In [15]:
test_set = test_set.skip(len(validation_set))
len(test_set)
Out[15]:
8
In [16]:
def get_dataset_partitions_tf(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000 ):
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=10)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_set = ds.take(train_size)
val_set = ds.skip(train_size).take(val_size)
test_set = ds.skip(train_size).skip(val_size)
return train_set, validation_set, test_set
In [17]:
train_set, validation_set, test_set = get_dataset_partitions_tf(imageDataset)
In [18]:
len(train_set)
Out[18]:
54
In [ ]:
# catching and fetching
In [19]:
# read image from disk and put it in cache for next usages
train_set = train_set.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
validation_set = validation_set.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
test_set = test_set.cache().shuffle(1000).prefetch(buffer_size = tf.data.AUTOTUNE)
In [ ]:
# some preprocessing
In [20]:
resize_and_rescale = tf.keras.Sequential([
layers.experimental.preprocessing.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.experimental.preprocessing.Rescaling(1.0/255)
])
In [ ]:
# Data augmentation
In [21]:
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.2),
])
In [ ]:
# Build, train and save the Model : CNN Model
In [22]:
input_shape = (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, RGB_CHANNELS)
nbr_classes = 3
In [23]:
model = models.Sequential([
resize_and_rescale,
data_augmentation,
layers.Conv2D(32, kernel_size = (3,3), activation='relu', input_shape = input_shape ),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2,2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(nbr_classes, activation='softmax'),
])
model.build(input_shape = input_shape)
In [24]:
model.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
sequential (Sequential) (32, 256, 256, 3) 0
sequential_1 (Sequential) (32, 256, 256, 3) 0
conv2d (Conv2D) (32, 254, 254, 32) 896
max_pooling2d (MaxPooling2 (32, 127, 127, 32) 0
D)
conv2d_1 (Conv2D) (32, 125, 125, 64) 18496
max_pooling2d_1 (MaxPoolin (32, 62, 62, 64) 0
g2D)
conv2d_2 (Conv2D) (32, 60, 60, 64) 36928
max_pooling2d_2 (MaxPoolin (32, 30, 30, 64) 0
g2D)
conv2d_3 (Conv2D) (32, 28, 28, 64) 36928
max_pooling2d_3 (MaxPoolin (32, 14, 14, 64) 0
g2D)
conv2d_4 (Conv2D) (32, 12, 12, 64) 36928
max_pooling2d_4 (MaxPoolin (32, 6, 6, 64) 0
g2D)
conv2d_5 (Conv2D) (32, 4, 4, 64) 36928
max_pooling2d_5 (MaxPoolin (32, 2, 2, 64) 0
g2D)
flatten (Flatten) (32, 256) 0
dense (Dense) (32, 64) 16448
dense_1 (Dense) (32, 3) 195
=================================================================
Total params: 183747 (717.76 KB)
Trainable params: 183747 (717.76 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
In [25]:
#Compile for uptimization
model.compile(
optimizer='adam',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics = ['accuracy']
)
In [26]:
history = model.fit(
train_set,
epochs = EPOCHS,
batch_size = BATCH_SIZE,
verbose = 1,
validation_data = validation_set,
)
Epoch 1/50 54/54 [==============================] - 28s 503ms/step - loss: 0.9097 - accuracy: 0.4671 - val_loss: 0.8448 - val_accuracy: 0.5469 Epoch 2/50 54/54 [==============================] - 42s 785ms/step - loss: 0.7673 - accuracy: 0.6461 - val_loss: 0.4900 - val_accuracy: 0.7917 Epoch 3/50 54/54 [==============================] - 46s 847ms/step - loss: 0.5755 - accuracy: 0.7694 - val_loss: 0.3885 - val_accuracy: 0.8125 Epoch 4/50 54/54 [==============================] - 46s 849ms/step - loss: 0.4174 - accuracy: 0.8257 - val_loss: 0.2254 - val_accuracy: 0.9271 Epoch 5/50 54/54 [==============================] - 46s 848ms/step - loss: 0.2640 - accuracy: 0.8950 - val_loss: 0.1732 - val_accuracy: 0.9427 Epoch 6/50 54/54 [==============================] - 46s 852ms/step - loss: 0.2458 - accuracy: 0.9085 - val_loss: 0.3344 - val_accuracy: 0.8542 Epoch 7/50 54/54 [==============================] - 46s 847ms/step - loss: 0.2306 - accuracy: 0.9102 - val_loss: 0.1159 - val_accuracy: 0.9688 Epoch 8/50 54/54 [==============================] - 46s 845ms/step - loss: 0.1964 - accuracy: 0.9255 - val_loss: 0.1805 - val_accuracy: 0.8958 Epoch 9/50 54/54 [==============================] - 46s 848ms/step - loss: 0.1783 - accuracy: 0.9313 - val_loss: 0.1928 - val_accuracy: 0.9271 Epoch 10/50 54/54 [==============================] - 46s 846ms/step - loss: 0.2333 - accuracy: 0.9155 - val_loss: 0.1995 - val_accuracy: 0.9062 Epoch 11/50 54/54 [==============================] - 46s 845ms/step - loss: 0.1819 - accuracy: 0.9302 - val_loss: 0.1004 - val_accuracy: 0.9583 Epoch 12/50 54/54 [==============================] - 46s 846ms/step - loss: 0.1909 - accuracy: 0.9225 - val_loss: 0.0844 - val_accuracy: 0.9844 Epoch 13/50 54/54 [==============================] - 46s 845ms/step - loss: 0.1616 - accuracy: 0.9384 - val_loss: 0.0887 - val_accuracy: 0.9583 Epoch 14/50 54/54 [==============================] - 46s 845ms/step - loss: 0.1502 - accuracy: 0.9396 - val_loss: 0.2659 - val_accuracy: 0.8854 Epoch 15/50 54/54 [==============================] - 46s 846ms/step - loss: 0.1228 - accuracy: 0.9519 - val_loss: 0.1184 - val_accuracy: 0.9479 Epoch 16/50 54/54 [==============================] - 46s 844ms/step - loss: 0.1232 - accuracy: 0.9583 - val_loss: 0.1153 - val_accuracy: 0.9635 Epoch 17/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0857 - accuracy: 0.9677 - val_loss: 0.0595 - val_accuracy: 0.9688 Epoch 18/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0907 - accuracy: 0.9677 - val_loss: 0.1301 - val_accuracy: 0.9323 Epoch 19/50 54/54 [==============================] - 46s 853ms/step - loss: 0.0747 - accuracy: 0.9736 - val_loss: 0.3579 - val_accuracy: 0.8854 Epoch 20/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0817 - accuracy: 0.9665 - val_loss: 0.1707 - val_accuracy: 0.9219 Epoch 21/50 54/54 [==============================] - 46s 846ms/step - loss: 0.0708 - accuracy: 0.9707 - val_loss: 0.1766 - val_accuracy: 0.9427 Epoch 22/50 54/54 [==============================] - 46s 850ms/step - loss: 0.0732 - accuracy: 0.9742 - val_loss: 0.0701 - val_accuracy: 0.9688 Epoch 23/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0939 - accuracy: 0.9648 - val_loss: 0.3314 - val_accuracy: 0.8542 Epoch 24/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0770 - accuracy: 0.9742 - val_loss: 0.1303 - val_accuracy: 0.9427 Epoch 25/50 54/54 [==============================] - 46s 846ms/step - loss: 0.0805 - accuracy: 0.9701 - val_loss: 0.1726 - val_accuracy: 0.9375 Epoch 26/50 54/54 [==============================] - 46s 849ms/step - loss: 0.0701 - accuracy: 0.9759 - val_loss: 0.0793 - val_accuracy: 0.9688 Epoch 27/50 54/54 [==============================] - 46s 857ms/step - loss: 0.0860 - accuracy: 0.9677 - val_loss: 0.0530 - val_accuracy: 0.9792 Epoch 28/50 54/54 [==============================] - 46s 853ms/step - loss: 0.0373 - accuracy: 0.9859 - val_loss: 0.0288 - val_accuracy: 0.9792 Epoch 29/50 54/54 [==============================] - 46s 843ms/step - loss: 0.0506 - accuracy: 0.9800 - val_loss: 0.0456 - val_accuracy: 0.9896 Epoch 30/50 54/54 [==============================] - 46s 846ms/step - loss: 0.0338 - accuracy: 0.9883 - val_loss: 0.0274 - val_accuracy: 0.9896 Epoch 31/50 54/54 [==============================] - 46s 846ms/step - loss: 0.0400 - accuracy: 0.9859 - val_loss: 0.0494 - val_accuracy: 0.9792 Epoch 32/50 54/54 [==============================] - 46s 847ms/step - loss: 0.0355 - accuracy: 0.9824 - val_loss: 0.1104 - val_accuracy: 0.9531 Epoch 33/50 54/54 [==============================] - 46s 853ms/step - loss: 0.0761 - accuracy: 0.9718 - val_loss: 0.0464 - val_accuracy: 0.9740 Epoch 34/50 54/54 [==============================] - 46s 849ms/step - loss: 0.0338 - accuracy: 0.9883 - val_loss: 0.0391 - val_accuracy: 0.9844 Epoch 35/50 54/54 [==============================] - 46s 852ms/step - loss: 0.0302 - accuracy: 0.9906 - val_loss: 0.0561 - val_accuracy: 0.9844 Epoch 36/50 54/54 [==============================] - 46s 859ms/step - loss: 0.0570 - accuracy: 0.9783 - val_loss: 0.1028 - val_accuracy: 0.9740 Epoch 37/50 54/54 [==============================] - 46s 851ms/step - loss: 0.0486 - accuracy: 0.9830 - val_loss: 0.0309 - val_accuracy: 0.9896 Epoch 38/50 54/54 [==============================] - 46s 850ms/step - loss: 0.0821 - accuracy: 0.9701 - val_loss: 0.0282 - val_accuracy: 0.9844 Epoch 39/50 54/54 [==============================] - 46s 846ms/step - loss: 0.0646 - accuracy: 0.9707 - val_loss: 0.0689 - val_accuracy: 0.9688 Epoch 40/50 54/54 [==============================] - 46s 847ms/step - loss: 0.0438 - accuracy: 0.9830 - val_loss: 0.0485 - val_accuracy: 0.9792 Epoch 41/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0688 - accuracy: 0.9777 - val_loss: 0.0488 - val_accuracy: 0.9792 Epoch 42/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0353 - accuracy: 0.9883 - val_loss: 0.0373 - val_accuracy: 0.9896 Epoch 43/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0388 - accuracy: 0.9883 - val_loss: 0.0561 - val_accuracy: 0.9688 Epoch 44/50 54/54 [==============================] - 46s 850ms/step - loss: 0.0195 - accuracy: 0.9918 - val_loss: 0.0477 - val_accuracy: 0.9792 Epoch 45/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0452 - accuracy: 0.9836 - val_loss: 0.0352 - val_accuracy: 0.9896 Epoch 46/50 54/54 [==============================] - 46s 848ms/step - loss: 0.0215 - accuracy: 0.9912 - val_loss: 0.0173 - val_accuracy: 0.9948 Epoch 47/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0357 - accuracy: 0.9894 - val_loss: 0.0537 - val_accuracy: 0.9792 Epoch 48/50 54/54 [==============================] - 46s 844ms/step - loss: 0.0316 - accuracy: 0.9871 - val_loss: 0.0486 - val_accuracy: 0.9844 Epoch 49/50 54/54 [==============================] - 46s 845ms/step - loss: 0.0100 - accuracy: 0.9959 - val_loss: 0.0569 - val_accuracy: 0.9740 Epoch 50/50 54/54 [==============================] - 46s 853ms/step - loss: 0.0336 - accuracy: 0.9883 - val_loss: 0.0651 - val_accuracy: 0.9792
In [27]:
scores = model.evaluate(test_set)
8/8 [==============================] - 2s 138ms/step - loss: 0.0527 - accuracy: 0.9805
In [28]:
scores
Out[28]:
[0.052731577306985855, 0.98046875]
In [29]:
history.history.keys()
Out[29]:
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
In [31]:
len(history.history['accuracy']) # equal to number of EPOCH
Out[31]:
50
In [32]:
accs = history.history['accuracy']
val_accs = history.history['val_accuracy']
losses = history.history['loss']
val_losses = history.history['val_loss']
In [36]:
# Training and Validation Accuracy Plot
plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.plot(range(EPOCHS), accs, label = 'Training Accuracy')
plt.plot(range(EPOCHS), val_accs, label = 'Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
# Training loss Plot
#plt.figure(figsize=(8,8))
plt.subplot(1,2,2)
plt.plot(range(EPOCHS), losses, label = 'Training loss')
plt.plot(range(EPOCHS), val_losses, label = 'Validation loss')
plt.legend(loc='upper right')
plt.title('Training and Validation loss')
plt.show()
In [39]:
for images_batch, labels_batch in test_set.take(1):
first_image = images_batch[0].numpy().astype('uint8')
first_label = labels_batch[0].numpy()
print("first image to predict")
plt.imshow(first_image)
print("actual label:", class_names[first_label])
batch_prediction = model.predict(image_batch)
#print(np.argmax(batch_prediction[0]))
print('predicted label :', class_names[np.argmax(batch_prediction[0])] )
first image to predict actual label: Potato___Early_blight 1/1 [==============================] - 0s 164ms/step predicted label : Potato___Late_blight
In [ ]:
In [41]:
def predict(model, img):
img_array = tf.keras.preprocessing.image.img_to_array(images[i].numpy())
img_array = tf.expand_dims(img_array, 0) # create a batch
predictions = model.predict(img_array)
predicted_class = class_names[np.argmax(predictions[0])]
confidence = round(100 * (np.max(predictions[0])), 2 )
return predicted_class, confidence
In [44]:
plt.figure(figsize=(15, 15))
for images, labels in test_set.take(1):
for i in range(9):
ax = plt.subplot(3,3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class, confidence = predict(model, images[i].numpy())
actual_class = class_names[labels[i]]
plt.title(f"Actual: {actual_class}, \n Predicted: {predicted_class},\n Confidence: {confidence}%")
plt.axis('off')
1/1 [==============================] - 0s 21ms/step 1/1 [==============================] - 0s 18ms/step 1/1 [==============================] - 0s 19ms/step 1/1 [==============================] - 0s 20ms/step 1/1 [==============================] - 0s 19ms/step 1/1 [==============================] - 0s 20ms/step 1/1 [==============================] - 0s 19ms/step 1/1 [==============================] - 0s 18ms/step 1/1 [==============================] - 0s 17ms/step
In [ ]:
# Save the models
In [ ]:
# Save the models
In [60]:
import os
model_version = 2#max([int(i) for i in os.listdir("../models") + [0]])+1
In [61]:
model.save(f"../models/{model_version}")
INFO:tensorflow:Assets written to: ../models/2/assets
INFO:tensorflow:Assets written to: ../models/2/assets
In [ ]: